return;
}
+static void relinquish_memory(struct domain *d, struct list_head *list)
+{
+ struct list_head *ent;
+ struct page_info *page;
+
+ /* Use a recursive lock, as we may enter 'free_domheap_page'. */
+ spin_lock_recursive(&d->page_alloc_lock);
+
+ ent = list->next;
+ while ( ent != list )
+ {
+ page = list_entry(ent, struct page_info, list);
+
+ /* Grab a reference to the page so it won't disappear from under us. */
+ if ( unlikely(!get_page(page, d)) )
+ {
+ /* Couldn't get a reference -- someone is freeing this page. */
+ ent = ent->next;
+ continue;
+ }
+ if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
+ put_page_and_type(page);
+
+ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) )
+ put_page(page);
+
+ /* Follow the list chain and /then/ potentially free the page. */
+ ent = ent->next;
+ put_page(page);
+ }
+ spin_unlock_recursive(&d->page_alloc_lock);
+}
+
void domain_relinquish_resources(struct domain *d)
{
- free_rma(d);
+ relinquish_memory(d, &d->page_list);
free_extents(d);
+ return;
}
void arch_dump_domain_info(struct domain *d)
return 0;
}
-void free_rma(struct domain *d)
-{
- if (d->arch.rma_page) {
- free_domheap_pages(d->arch.rma_page, d->arch.rma_order);
- }
-}
-
ulong pfn2mfn(struct domain *d, ulong pfn, int *type)
{
ulong rma_base_mfn = page_to_mfn(d->arch.rma_page);
while ( unlikely((y = cmpxchg(&page->count_info, x, nx)) != x) );
if ( unlikely((nx & PGC_count_mask) == 0) ) {
- panic("about to free page: 0x%lx\n", page_to_mfn(page));
free_domheap_page(page);
}
}
#define mfn_to_gmfn(_d, mfn) (mfn)
extern int allocate_rma(struct domain *d, unsigned int order_pages);
-extern void free_rma(struct domain *d);
extern uint allocate_extents(struct domain *d, uint nrpages, uint rma_nrpages);
extern void free_extents(struct domain *d);